From 2a9780ae0c45670b9f4521386be662021cfe9b51 Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Wed, 14 Jul 2004 17:36:29 +0000 Subject: [PATCH] bitkeeper revision 1.1086.2.1 (40f56f1dUhjhI2VI1m-7iYS5gF4jqA) x86-64 now executes as far as the boot-time banner. --- .rootkeys | 1 - README.CD | 2 +- docs/HOWTOs/Xen-HOWTO | 2 +- xen/arch/x86/boot/x86_32.S | 16 +- xen/arch/x86/boot/x86_64.S | 43 +++-- xen/arch/x86/x86_32/mm.c | 8 +- xen/common/kernel.c | 8 +- xen/include/asm-x86/config.h | 5 + xen/include/asm-x86/page.h | 82 +++++--- xen/include/asm-x86/x86_64/page.h | 300 ------------------------------ 10 files changed, 105 insertions(+), 362 deletions(-) delete mode 100644 xen/include/asm-x86/x86_64/page.h diff --git a/.rootkeys b/.rootkeys index 0d34efe94b..59727210ec 100644 --- a/.rootkeys +++ b/.rootkeys @@ -585,7 +585,6 @@ 404f1b9ceJeGVaPNIENm2FkK0AgEOQ xen/include/asm-x86/x86_64/current.h 404f1b9fl6AQ_a-T1TDK3fuwTPXmHw xen/include/asm-x86/x86_64/desc.h 404f1badfXZJZ2sU8sh9PS2EZvd19Q xen/include/asm-x86/x86_64/ldt.h -404f1bb1LSCqrMDSfRAti5NdMQPJBQ xen/include/asm-x86/x86_64/page.h 404f1bb86rAXB3aLS1vYdcqpJiEcyg xen/include/asm-x86/x86_64/ptrace.h 40e1966azOJZfNI6Ilthe6Q-T3Hewg xen/include/asm-x86/x86_64/string.h 404f1bc4tWkB9Qr8RkKtZGW5eMQzhw xen/include/asm-x86/x86_64/uaccess.h diff --git a/README.CD b/README.CD index be17acfc54..a0e47f3183 100644 --- a/README.CD +++ b/README.CD @@ -305,7 +305,7 @@ that may be able to help diagnose problems: shared by two subsystems (eg. console and debugger). Sharing is controlled by MSB of each transmitted/received character. - [NB. Default for this option is 'com1,tty'] + [NB. Default for this option is 'com1,vga'] conswitch= Specify how to switch serial-console input between diff --git a/docs/HOWTOs/Xen-HOWTO b/docs/HOWTOs/Xen-HOWTO index b0ec801659..eee9251577 100644 --- a/docs/HOWTOs/Xen-HOWTO +++ b/docs/HOWTOs/Xen-HOWTO @@ -251,7 +251,7 @@ The following is a list of command line arguments to pass to Xen: shared by two subsystems (eg. console and debugger). Sharing is controlled by MSB of each transmitted/received character. - [NB. Default for this option is 'com1,tty'] + [NB. Default for this option is 'com1,vga'] conswitch= Specify how to switch serial-console input between diff --git a/xen/arch/x86/boot/x86_32.S b/xen/arch/x86/boot/x86_32.S index fea54c1336..ce09feb6e8 100644 --- a/xen/arch/x86/boot/x86_32.S +++ b/xen/arch/x86/boot/x86_32.S @@ -20,9 +20,15 @@ ENTRY(start) .long -0x1BADB004 bad_cpu_msg: - .asciz "Bad CPU: we need at least a P6-compatible core." + .asciz "ERR: Not a P6-compatible CPU!" +not_multiboot_msg: + .asciz "ERR: Not a Multiboot bootloader!" bad_cpu: mov $SYMBOL_NAME(bad_cpu_msg)-__PAGE_OFFSET,%esi + jmp print_err +not_multiboot: + mov $SYMBOL_NAME(not_multiboot_msg)-__PAGE_OFFSET,%esi +print_err: mov $0xB8000,%edi # VGA framebuffer 1: mov (%esi),%bl test %bl,%bl # Terminate on '\0' sentinel @@ -79,9 +85,13 @@ __start: cmp $(SECONDARY_CPU_FLAG),%ebx je start_paging + /* Check for Multiboot bootloader */ + cmp $0x2BADB002,%eax + jne not_multiboot + + /* Save the Multiboot info structure for later use. */ add $__PAGE_OFFSET,%ebx - push %ebx /* Multiboot info struct */ - push %eax /* Multiboot magic value */ + push %ebx /* Initialize BSS (no nasty surprises!) */ mov $__bss_start-__PAGE_OFFSET,%edi diff --git a/xen/arch/x86/boot/x86_64.S b/xen/arch/x86/boot/x86_64.S index a765141910..73a23023e2 100644 --- a/xen/arch/x86/boot/x86_64.S +++ b/xen/arch/x86/boot/x86_64.S @@ -21,9 +21,15 @@ ENTRY(start) .long -0x1BADB004 .org 0x010 - .asciz "Bad CPU: does not support 64-bit (long) mode." + .asciz "ERR: Not a 64-bit CPU!" + .org 0x028 + .asciz "ERR: Not a Multiboot bootloader!" bad_cpu: mov $0x100010,%esi # Error message + jmp print_err +not_multiboot: + mov $0x100028,%esi # Error message +print_err: mov $0xB8000,%edi # VGA framebuffer 1: mov (%esi),%bl test %bl,%bl # Terminate on '\0' sentinel @@ -50,6 +56,16 @@ __start: mov %ecx,%ds mov %ecx,%es + /* Check for Multiboot bootloader */ + cmp $(SECONDARY_CPU_FLAG),%ebx + je skip_multiboot_check + cmp $0x2BADB002,%eax + jne not_multiboot +skip_multiboot_check: + + /* Save the Multiboot info structure for later use. */ + mov %ebx,0x1001e0 + /* We begin by interrogating the CPU for the presence of long mode. */ mov $0x80000000,%eax cpuid @@ -67,9 +83,6 @@ __start: mov $0x20,%ecx # X86_CR4_PAE mov %ecx,%cr4 - mov %ebx,0x1001e0 /* Multiboot info struct */ - mov %eax,0x1001e4 /* Multiboot magic value */ - /* Load pagetable base register. */ mov $0x101000,%eax /* idle_pg_table */ mov %eax,%cr3 @@ -138,14 +151,11 @@ __high_start: add $8,%rdi loop 1b - mov 0x1001e0,%eax /* Multiboot info struct */ - lea start(%rip),%rbx - sub $0x100000,%rbx - add %rbx,%rax - push %rax - mov 0x1001e4,%eax /* Multiboot magic value */ - push %rax - + /* Pass off the Multiboot info structure to C land. */ + mov 0x1001e0,%edi + lea start(%rip),%rax + sub $0x100000,%rax + add %rax,%rdi call cmain /* This is the default interrupt handler. */ @@ -156,8 +166,7 @@ ignore_int: mov $(__HYPERVISOR_DS),%eax mov %eax,%ds mov %eax,%es - lea int_msg(%rip),%rax - push %rax + lea int_msg(%rip),%rdi call SYMBOL_NAME(printf) 1: jmp 1b @@ -267,9 +276,11 @@ continue_nonidle_task: __get_user_1: paging_init: trap_init: -.globl __get_user_8, zap_low_mappings, set_debugreg +.globl __get_user_8, zap_low_mappings, set_debugreg,synchronise_pagetables __get_user_8: zap_low_mappings: set_debugreg: - +synchronise_pagetables: +.globl destroy_gdt +destroy_gdt: diff --git a/xen/arch/x86/x86_32/mm.c b/xen/arch/x86/x86_32/mm.c index d350689071..85e9ff1e69 100644 --- a/xen/arch/x86/x86_32/mm.c +++ b/xen/arch/x86/x86_32/mm.c @@ -68,7 +68,7 @@ static void __init fixrange_init(unsigned long start, for ( ; (i < ENTRIES_PER_L2_PAGETABLE) && (vaddr != end); l2e++, i++ ) { - if ( !l2_pgentry_empty(*l2e) ) + if ( l2_pgentry_val(*l2e) != 0 ) continue; page = (unsigned long)get_free_page(); clear_page(page); @@ -104,9 +104,9 @@ void __init paging_init(void) /* Create read-only mapping of MPT for guest-OS use. */ idle_pg_table[RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT] = - idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]; - mk_l2_readonly(idle_pg_table + - (RO_MPT_VIRT_START >> L2_PAGETABLE_SHIFT)); + mk_l2_pgentry(l2_pgentry_val( + idle_pg_table[RDWR_MPT_VIRT_START >> L2_PAGETABLE_SHIFT]) & + ~_PAGE_RW); /* Set up mapping cache for domain pages. */ mapcache = (unsigned long *)get_free_page(); diff --git a/xen/common/kernel.c b/xen/common/kernel.c index b90d467f9e..7facb69cac 100644 --- a/xen/common/kernel.c +++ b/xen/common/kernel.c @@ -108,7 +108,7 @@ static struct { }; -void cmain(unsigned long magic, multiboot_info_t *mbi) +void cmain(multiboot_info_t *mbi) { struct domain *new_dom; unsigned long max_page; @@ -173,12 +173,6 @@ void cmain(unsigned long magic, multiboot_info_t *mbi) XEN_COMPILER, XEN_COMPILE_DATE); set_printk_prefix("(XEN) "); - if ( magic != MULTIBOOT_BOOTLOADER_MAGIC ) - { - printk("FATAL ERROR: Invalid magic number: 0x%08lx\n", magic); - for ( ; ; ) ; - } - /* We require memory and module information. */ if ( (mbi->flags & 9) != 9 ) { diff --git a/xen/include/asm-x86/config.h b/xen/include/asm-x86/config.h index e950f8112a..496ebad1c2 100644 --- a/xen/include/asm-x86/config.h +++ b/xen/include/asm-x86/config.h @@ -94,6 +94,11 @@ extern void __out_of_line_bug(int line) __attribute__((noreturn)); #define out_of_line_bug() __out_of_line_bug(__LINE__) #endif /* __ASSEMBLY__ */ +#define BUG() do { \ + printk("BUG at %s:%d\n", __FILE__, __LINE__); \ + __asm__ __volatile__("ud2"); \ +} while (0) + #if defined(__x86_64__) #define XENHEAP_DEFAULT_MB (16) diff --git a/xen/include/asm-x86/page.h b/xen/include/asm-x86/page.h index 8e4a7ec093..c7a8905064 100644 --- a/xen/include/asm-x86/page.h +++ b/xen/include/asm-x86/page.h @@ -1,10 +1,27 @@ -#ifndef _I386_PAGE_H -#define _I386_PAGE_H +/****************************************************************************** + * asm-x86/page.h + * + * Definitions relating to page tables. + */ + +#ifndef __X86_PAGE_H__ +#define __X86_PAGE_H__ -#define BUG() do { \ - printk("BUG at %s:%d\n", __FILE__, __LINE__); \ - __asm__ __volatile__("ud2"); \ -} while (0) +#if defined(__x86_64__) + +#define L1_PAGETABLE_SHIFT 12 +#define L2_PAGETABLE_SHIFT 21 +#define L3_PAGETABLE_SHIFT 30 +#define L4_PAGETABLE_SHIFT 39 + +#define ENTRIES_PER_L1_PAGETABLE 512 +#define ENTRIES_PER_L2_PAGETABLE 512 +#define ENTRIES_PER_L3_PAGETABLE 512 +#define ENTRIES_PER_L4_PAGETABLE 512 + +#define __PAGE_OFFSET (0xFFFF830000000000) + +#elif defined(__i386__) #define L1_PAGETABLE_SHIFT 12 #define L2_PAGETABLE_SHIFT 22 @@ -12,6 +29,10 @@ #define ENTRIES_PER_L1_PAGETABLE 1024 #define ENTRIES_PER_L2_PAGETABLE 1024 +#define __PAGE_OFFSET (0xFC400000) + +#endif + #define PAGE_SHIFT L1_PAGETABLE_SHIFT #define PAGE_SIZE (1UL << PAGE_SHIFT) #define PAGE_MASK (~(PAGE_SIZE-1)) @@ -23,44 +44,60 @@ #include typedef struct { unsigned long l1_lo; } l1_pgentry_t; typedef struct { unsigned long l2_lo; } l2_pgentry_t; -typedef l1_pgentry_t *l1_pagetable_t; -typedef l2_pgentry_t *l2_pagetable_t; +typedef struct { unsigned long l3_lo; } l3_pgentry_t; +typedef struct { unsigned long l4_lo; } l4_pgentry_t; typedef struct { unsigned long pt_lo; } pagetable_t; #endif /* !__ASSEMBLY__ */ /* Strip type from a table entry. */ #define l1_pgentry_val(_x) ((_x).l1_lo) #define l2_pgentry_val(_x) ((_x).l2_lo) +#define l3_pgentry_val(_x) ((_x).l3_lo) +#define l4_pgentry_val(_x) ((_x).l4_lo) #define pagetable_val(_x) ((_x).pt_lo) /* Add type to a table entry. */ #define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } ) #define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } ) +#define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } ) +#define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } ) #define mk_pagetable(_x) ( (pagetable_t) { (_x) } ) /* Turn a typed table entry into a page index. */ #define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) #define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT) +#define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT) +#define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT) /* Turn a typed table entry into a physical address. */ #define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK) #define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK) +#define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK) +#define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK) -/* Dereference a typed level-2 entry to yield a typed level-1 table. */ -#define l2_pgentry_to_l1(_x) \ +/* Pagetable walking. */ +#define l2_pgentry_to_l1(_x) \ ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK)) +#define l3_pgentry_to_l2(_x) \ + ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK)) +#define l4_pgentry_to_l3(_x) \ + ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK)) /* Given a virtual address, get an entry offset into a page table. */ #define l1_table_offset(_a) \ (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) +#if defined(__i386__) #define l2_table_offset(_a) \ ((_a) >> L2_PAGETABLE_SHIFT) +#elif defined(__x86_64__) +#define l2_table_offset(_a) \ + (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE -1)) +#define l3_table_offset(_a) \ + (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE -1)) +#define l4_table_offset(_a) \ + ((_a) >> L4_PAGETABLE_SHIFT) +#endif -/* Hypervisor table entries use zero to sugnify 'empty'. */ -#define l1_pgentry_empty(_x) (!l1_pgentry_val(_x)) -#define l2_pgentry_empty(_x) (!l2_pgentry_val(_x)) - -#define __PAGE_OFFSET (0xFC400000) #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define __pa(x) ((unsigned long)(x)-PAGE_OFFSET) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) @@ -148,25 +185,12 @@ __asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr))) (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) #define __PAGE_HYPERVISOR_NOCACHE \ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED) -#define __PAGE_HYPERVISOR_RO \ - (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) #define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL) #define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR) -#define PAGE_HYPERVISOR_RO MAKE_GLOBAL(__PAGE_HYPERVISOR_RO) #define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE) -#define mk_l2_writeable(_p) \ - (*(_p) = mk_l2_pgentry(l2_pgentry_val(*(_p)) | _PAGE_RW)) -#define mk_l2_readonly(_p) \ - (*(_p) = mk_l2_pgentry(l2_pgentry_val(*(_p)) & ~_PAGE_RW)) -#define mk_l1_writeable(_p) \ - (*(_p) = mk_l1_pgentry(l1_pgentry_val(*(_p)) | _PAGE_RW)) -#define mk_l1_readonly(_p) \ - (*(_p) = mk_l1_pgentry(l1_pgentry_val(*(_p)) & ~_PAGE_RW)) - - #ifndef __ASSEMBLY__ static __inline__ int get_order(unsigned long size) { @@ -184,4 +208,4 @@ static __inline__ int get_order(unsigned long size) extern void zap_low_mappings(void); #endif -#endif /* _I386_PAGE_H */ +#endif /* __I386_PAGE_H__ */ diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h deleted file mode 100644 index 919fd2d80a..0000000000 --- a/xen/include/asm-x86/x86_64/page.h +++ /dev/null @@ -1,300 +0,0 @@ -#ifndef _X86_64_PAGE_H -#define _X86_64_PAGE_H - -#define BUG() do { \ - printk("BUG at %s:%d\n", __FILE__, __LINE__); \ - __asm__ __volatile__("ud2"); \ -} while (0) - -#define __PHYSICAL_MASK 0x0000ffffffffffffUL -#define PHYSICAL_PAGE_MASK 0x0000fffffffff000UL -#define PTE_MASK PHYSICAL_PAGE_MASK - -/* PAGE_SHIFT determines the page size */ -#define PAGE_SHIFT 12 -#ifdef __ASSEMBLY__ -#define PAGE_SIZE (0x1 << PAGE_SHIFT) -#else -#define PAGE_SIZE (1UL << PAGE_SHIFT) -#endif -#define PAGE_MASK (~(PAGE_SIZE-1)) -#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1)) -#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT) - -#define L1_PAGETABLE_SHIFT 12 -#define L2_PAGETABLE_SHIFT 21 -#define L3_PAGETABLE_SHIFT 30 -#define L4_PAGETABLE_SHIFT 39 -#define LARGE_PFN (LARGE_PAGE_SIZE / PAGE_SIZE) - -#define ENTRIES_PER_L1_PAGETABLE 512 -#define ENTRIES_PER_L2_PAGETABLE 512 -#define ENTRIES_PER_L3_PAGETABLE 512 -#define ENTRIES_PER_L4_PAGETABLE 512 - -#define KERNEL_TEXT_SIZE (40UL*1024*1024) -#define KERNEL_TEXT_START 0xffffffff80000000UL - -/* Changing the next two defines should be enough to increase the kernel stack */ -/* We still hope 8K is enough, but ... */ -#define THREAD_ORDER 1 -#define THREAD_SIZE (2*PAGE_SIZE) - -#define INIT_TASK_SIZE THREAD_SIZE -#define CURRENT_MASK (~(THREAD_SIZE-1)) - -#define clear_page(_p) memset((void *)(_p), 0, PAGE_SIZE) -#define copy_page(_t,_f) memcpy((void *)(_t), (void *)(_f), PAGE_SIZE) - -#ifndef __ASSEMBLY__ -#include -typedef struct { unsigned long l1_lo; } l1_pgentry_t; -typedef struct { unsigned long l2_lo; } l2_pgentry_t; -typedef struct { unsigned long l3_lo; } l3_pgentry_t; -typedef struct { unsigned long l4_lo; } l4_pgentry_t; -typedef l1_pgentry_t *l1_pagetable_t; -typedef l2_pgentry_t *l2_pagetable_t; -typedef l3_pgentry_t *l3_pagetable_t; -typedef l4_pgentry_t *l4_pagetable_t; -typedef struct { unsigned long pt_lo; } pagetable_t; -typedef struct { unsigned long pgprot; } pgprot_t; -#endif /* !__ASSEMBLY__ */ - -/* Strip type from a table entry. */ -#define l1_pgentry_val(_x) ((_x).l1_lo) -#define l2_pgentry_val(_x) ((_x).l2_lo) -#define l3_pgentry_val(_x) ((_x).l3_lo) -#define l4_pgentry_val(_x) ((_x).l4_lo) -#define pagetable_val(_x) ((_x).pt_lo) - -/* Add type to a table entry. */ -#define mk_l1_pgentry(_x) ( (l1_pgentry_t) { (_x) } ) -#define mk_l2_pgentry(_x) ( (l2_pgentry_t) { (_x) } ) -#define mk_l3_pgentry(_x) ( (l3_pgentry_t) { (_x) } ) -#define mk_l4_pgentry(_x) ( (l4_pgentry_t) { (_x) } ) -#define mk_pagetable(_x) ( (pagetable_t) { (_x) } ) - -/* Turn a typed table entry into a page index. */ -#define l1_pgentry_to_pagenr(_x) (l1_pgentry_val(_x) >> PAGE_SHIFT) -#define l2_pgentry_to_pagenr(_x) (l2_pgentry_val(_x) >> PAGE_SHIFT) -#define l3_pgentry_to_pagenr(_x) (l3_pgentry_val(_x) >> PAGE_SHIFT) -#define l4_pgentry_to_pagenr(_x) (l4_pgentry_val(_x) >> PAGE_SHIFT) - -/* Turn a typed table entry into a physical address. */ -#define l1_pgentry_to_phys(_x) (l1_pgentry_val(_x) & PAGE_MASK) -#define l2_pgentry_to_phys(_x) (l2_pgentry_val(_x) & PAGE_MASK) -#define l3_pgentry_to_phys(_x) (l3_pgentry_val(_x) & PAGE_MASK) -#define l4_pgentry_to_phys(_x) (l4_pgentry_val(_x) & PAGE_MASK) - -/* Dereference a typed level-2 entry to yield a typed level-1 table. */ -#define l2_pgentry_to_l1(_x) \ - ((l1_pgentry_t *)__va(l2_pgentry_val(_x) & PAGE_MASK)) - -/* Dereference a typed level-4 entry to yield a typed level-3 table. */ -#define l4_pgentry_to_l3(_x) \ - ((l3_pgentry_t *)__va(l4_pgentry_val(_x) & PAGE_MASK)) - -/* Dereference a typed level-3 entry to yield a typed level-2 table. */ -#define l3_pgentry_to_l2(_x) \ - ((l2_pgentry_t *)__va(l3_pgentry_val(_x) & PAGE_MASK)) - -/* Given a virtual address, get an entry offset into a page table. */ -#define l1_table_offset(_a) \ - (((_a) >> L1_PAGETABLE_SHIFT) & (ENTRIES_PER_L1_PAGETABLE - 1)) -#define l2_table_offset(_a) \ - (((_a) >> L2_PAGETABLE_SHIFT) & (ENTRIES_PER_L2_PAGETABLE - 1)) -#define l3_table_offset(_a) \ - (((_a) >> L3_PAGETABLE_SHIFT) & (ENTRIES_PER_L3_PAGETABLE - 1)) -#define l4_table_offset(_a) \ - ((_a) >> L4_PAGETABLE_SHIFT) - -/* Hypervisor table entries use zero to sugnify 'empty'. */ -#define l1_pgentry_empty(_x) (!l1_pgentry_val(_x)) -#define l2_pgentry_empty(_x) (!l2_pgentry_val(_x)) -#define l3_pgentry_empty(_x) (!l3_pgentry_val(_x)) -#define l4_pgentry_empty(_x) (!l4_pgentry_val(_x)) - - -#define pgprot_val(x) ((x).pgprot) -#define __pgprot(x) ((pgprot_t) { (x) } ) - -#define clear_user_page(page, vaddr) clear_page(page) -#define copy_user_page(to, from, vaddr) copy_page(to, from) - -/* to align the pointer to the (next) page boundary */ -#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) - -/* - * NB. We don't currently track I/O holes in the physical RAM space. - * For now we guess that I/O devices will be mapped in the first 1MB - * (e.g., VGA buffers) or beyond the end of physical RAM. - */ -#define pfn_is_ram(_pfn) (((_pfn) > 0x100) && ((_pfn) < max_page)) - -/* High table entries are reserved by the hypervisor. */ -#define DOMAIN_ENTRIES_PER_L4_PAGETABLE \ - (HYPERVISOR_VIRT_START >> L4_PAGETABLE_SHIFT) -#define HYPERVISOR_ENTRIES_PER_L4_PAGETABLE \ - (ENTRIES_PER_L4_PAGETABLE - DOMAIN_ENTRIES_PER_L4_PAGETABLE) - -#define __START_KERNEL 0xffffffff80100000 -#define __START_KERNEL_map 0xffffffff80000000 -#define __PAGE_OFFSET 0x0000010000000000 -#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) - -#ifndef __ASSEMBLY__ -#include -#include -#include -#include - -extern unsigned long vm_stack_flags, vm_stack_flags32; -extern unsigned long vm_data_default_flags, vm_data_default_flags32; -extern unsigned long vm_force_exec32; - -#define linear_pg_table ((l1_pgentry_t *)LINEAR_PT_VIRT_START) - -extern l2_pgentry_t idle_pg_table[ENTRIES_PER_L2_PAGETABLE]; -extern void paging_init(void); - -#define __flush_tlb() \ - do { \ - __asm__ __volatile__ ( \ - "movl %%cr3, %%eax; movl %%eax, %%cr3" \ - : : : "memory", "eax" ); \ - tlb_clocktick(); \ - } while ( 0 ) - -/* Flush global pages as well. */ - -#define __pge_off() \ - do { \ - __asm__ __volatile__( \ - "movl %0, %%cr4; # turn off PGE " \ - :: "r" (mmu_cr4_features & ~X86_CR4_PGE)); \ - } while (0) - -#define __pge_on() \ - do { \ - __asm__ __volatile__( \ - "movl %0, %%cr4; # turn off PGE " \ - :: "r" (mmu_cr4_features)); \ - } while (0) - - -#define __flush_tlb_pge() \ - do { \ - __pge_off(); \ - __flush_tlb(); \ - __pge_on(); \ - } while (0) - -#define __flush_tlb_one(__addr) \ -__asm__ __volatile__("invlpg %0": :"m" (*(char *) (__addr))) - -#include - -/* - * Tell the user there is some problem. The exception handler decodes this frame. - */ -struct bug_frame { - unsigned char ud2[2]; - char *filename; /* should use 32bit offset instead, but the assembler doesn't like it */ - unsigned short line; -} __attribute__((packed)); -#define HEADER_BUG() asm volatile("ud2 ; .quad %P1 ; .short %P0" :: "i"(__LINE__), \ - "i" (__stringify(__FILE__))) -#define PAGE_BUG(page) BUG() - -#endif /* ASSEMBLY */ - -#define _PAGE_PRESENT 0x001 -#define _PAGE_RW 0x002 -#define _PAGE_USER 0x004 -#define _PAGE_PWT 0x008 -#define _PAGE_PCD 0x010 -#define _PAGE_ACCESSED 0x020 -#define _PAGE_DIRTY 0x040 -#define _PAGE_PAT 0x080 -#define _PAGE_PSE 0x080 -#define _PAGE_GLOBAL 0x100 - -#define __PAGE_HYPERVISOR \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) -#define __PAGE_HYPERVISOR_NOCACHE \ - (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED) -#define __PAGE_HYPERVISOR_RO \ - (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED) - -#define MAKE_GLOBAL(_x) ((_x) | _PAGE_GLOBAL) - -#define PAGE_HYPERVISOR MAKE_GLOBAL(__PAGE_HYPERVISOR) -#define PAGE_HYPERVISOR_RO MAKE_GLOBAL(__PAGE_HYPERVISOR_RO) -#define PAGE_HYPERVISOR_NOCACHE MAKE_GLOBAL(__PAGE_HYPERVISOR_NOCACHE) - -#define mk_l4_writeable(_p) \ - (*(_p) = mk_l4_pgentry(l4_pgentry_val(*(_p)) | _PAGE_RW)) -#define mk_l4_readonly(_p) \ - (*(_p) = mk_l4_pgentry(l4_pgentry_val(*(_p)) & ~_PAGE_RW)) -#define mk_l3_writeable(_p) \ - (*(_p) = mk_l3_pgentry(l3_pgentry_val(*(_p)) | _PAGE_RW)) -#define mk_l3_readonly(_p) \ - (*(_p) = mk_l3_pgentry(l3_pgentry_val(*(_p)) & ~_PAGE_RW)) -#define mk_l2_writeable(_p) \ - (*(_p) = mk_l2_pgentry(l2_pgentry_val(*(_p)) | _PAGE_RW)) -#define mk_l2_readonly(_p) \ - (*(_p) = mk_l2_pgentry(l2_pgentry_val(*(_p)) & ~_PAGE_RW)) -#define mk_l1_writeable(_p) \ - (*(_p) = mk_l1_pgentry(l1_pgentry_val(*(_p)) | _PAGE_RW)) -#define mk_l1_readonly(_p) \ - (*(_p) = mk_l1_pgentry(l1_pgentry_val(*(_p)) & ~_PAGE_RW)) - -/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. - Otherwise you risk miscompilation. */ -#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET) -/* __pa_symbol should use for C visible symbols, but only for them. - This seems to be the official gcc blessed way to do such arithmetic. */ -#define __pa_symbol(x) \ - ({unsigned long v; \ - asm("" : "=r" (v) : "0" (x)); \ - v - __START_KERNEL_map; }) -#define __pa_maybe_symbol(x) \ - ({unsigned long v; \ - asm("" : "=r" (v) : "0" (x)); \ - __pa(v); }) -#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) -#ifndef CONFIG_DISCONTIGMEM -#define virt_to_page(kaddr) (frame_table + (__pa(kaddr) >> PAGE_SHIFT)) -#define pfn_to_page(pfn) (frame_table + (pfn)) -#define page_address(_p) (__va(((_p) - frame_table) << PAGE_SHIFT)) -#define VALID_PAGE(page) (((page) - frame_table) < max_mapnr) -#endif - -#ifndef __ASSEMBLY__ -static __inline__ int get_order(unsigned long size) -{ - int order; - - size = (size-1) >> (PAGE_SHIFT-1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} -#endif - -#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) - -#define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) -#define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \ - VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) - -#define VM_DATA_DEFAULT_FLAGS \ - ((current->thread.flags & THREAD_IA32) ? vm_data_default_flags32 : \ - vm_data_default_flags) -#define VM_STACK_FLAGS vm_stack_flags - -#endif /* _X86_64_PAGE_H */ -- 2.30.2